Libraries
library(survival)
library(FRESA.CAD)
## Loading required package: Rcpp
## Loading required package: stringr
## Loading required package: miscTools
## Loading required package: Hmisc
##
## Attaching package: 'Hmisc'
## The following objects are masked from 'package:base':
##
## format.pval, units
## Loading required package: pROC
## Type 'citation("pROC")' for a citation.
##
## Attaching package: 'pROC'
## The following objects are masked from 'package:stats':
##
## cov, smooth, var
op <- par(no.readonly = TRUE)
pander::panderOptions('digits', 3)
#pander::panderOptions('table.split.table', 400)
pander::panderOptions('keep.trailing.zeros',TRUE)
Cox Model Performance
Here we evaluate the model using the RRPlot() function.
The evaluation of the raw Cox model with RRPlot()
Here we will use the predicted event probability assuming a baseline
hazard for events withing 5 years
index <- predict(ml,dataBreast)
timeinterval <- 2*mean(subset(dataBreast,status==1)$time)
h0 <- sum(dataBreast$status & dataBreast$time <= timeinterval)
h0 <- h0/sum((dataBreast$time > timeinterval) | (dataBreast$status==1))
pander::pander(t(c(h0=h0,timeinterval=timeinterval)),caption="Initial Parameters")
Initial Parameters
| 0.323 |
51.1 |
rdata <- cbind(dataBreast$status,ppoisGzero(index,h0))
rownames(rdata) <- rownames(dataBreast)
rrAnalysisTrain <- RRPlot(rdata,atProb=c(0.90),
timetoEvent=dataBreast$time,
title="Raw Train: Breast Cancer",
ysurvlim=c(0.00,1.0),
riskTimeInterval=timeinterval)






As we can see the Observed probability as well as the Time vs. Events
are not calibrated.
Cox Calibration
op <- par(no.readonly = TRUE)
calprob <- CoxRiskCalibration(ml,dataBreast,"status","time")
pander::pander(c(h0=calprob$h0,
Gain=calprob$hazardGain,
DeltaTime=calprob$timeInterval),
caption="Cox Calibration Parameters")
Cross-Validation
Here we use the estimated h0 and timeinterval from the full set
rcv <- randomCV(theData=dataBreast,
theOutcome = Surv(time,status)~1,
fittingFunction=BSWiMS.model,
trainFraction = 0.9,
repetitions=100,
classSamplingType = "Pro"
)
.[+++++].[+++].[++++].[++++].[-+].[–].[–].[++++++].[+++++].[++++++]10
Tested: 118 Avg. Selected: 3.4 Min Tests: 1 Max Tests: 5 Mean Tests:
1.694915 . MAD: 0.452554
.[+++].[+++].[++++++].[++++].[+++++].[+++++].[++++++].[+++++++++].[++].[++++]20
Tested: 167 Avg. Selected: 4.35 Min Tests: 1 Max Tests: 6 Mean Tests:
2.39521 . MAD: 0.4697117
.[+++].[+++++].[+++++].[+++++].[+++++].[+++++++].[+++++++].[++++++].[+++].[+++++++]30
Tested: 184 Avg. Selected: 4.866667 Min Tests: 1 Max Tests: 8 Mean
Tests: 3.26087 . MAD: 0.4737094
.[++++++].[++++].[+++].[++++++].[+++++].[++++].[++++].[–].[++++].[++++++]40
Tested: 192 Avg. Selected: 4.925 Min Tests: 1 Max Tests: 10 Mean Tests:
4.166667 . MAD: 0.4739559
.[+++++].[+++++++].[++++++].[++++].[+++++].[++++++].[++++++].[++++++].[++++].[++++]50
Tested: 193 Avg. Selected: 5.12 Min Tests: 1 Max Tests: 11 Mean Tests:
5.181347 . MAD: 0.4751408
.[+++].[++++++++].[++++++].[++++].[+].[++].[++++++++].[++++++++].[++++++].[+++]60
Tested: 194 Avg. Selected: 5.183333 Min Tests: 1 Max Tests: 13 Mean
Tests: 6.185567 . MAD: 0.4779008
.[-+++].[++++].[++++].[+++].[++++++].[+++++++++].[++++++].[+++++].[++++].[++++]70
Tested: 194 Avg. Selected: 5.185714 Min Tests: 1 Max Tests: 15 Mean
Tests: 7.216495 . MAD: 0.479849
.[+++].[++++].[–].[+++].[+++++].[++++++].[+++].[+++++++].[+++++++].[++]80
Tested: 194 Avg. Selected: 5.0875 Min Tests: 1 Max Tests: 17 Mean Tests:
8.247423 . MAD: 0.4792785
.[++++].[+++++].[+++++].[++++].[++++].[+++].[+++++].[++++].[+].[+++++++]90
Tested: 194 Avg. Selected: 5.033333 Min Tests: 2 Max Tests: 19 Mean
Tests: 9.278351 . MAD: 0.4819606
.[++++].[++++++++].[++++].[+++].[+].[++++].[+].[++++++].[+].[+++++++]100
Tested: 194 Avg. Selected: 4.95 Min Tests: 2 Max Tests: 20 Mean Tests:
10.30928 . MAD: 0.4823863
stp <- rcv$survTestPredictions
stp <- stp[!is.na(stp[,4]),]
bbx <- boxplot(unlist(stp[,1])~rownames(stp),plot=FALSE)
times <- bbx$stats[3,]
status <- boxplot(unlist(stp[,2])~rownames(stp),plot=FALSE)$stats[3,]
prob <- ppoisGzero(boxplot(unlist(stp[,4])~rownames(stp),plot=FALSE)$stats[3,],h0)
rdatacv <- cbind(status,prob)
rownames(rdatacv) <- bbx$names
names(times) <- bbx$names
rrAnalysisTest <- RRPlot(rdatacv,atThr = rrAnalysisTrain$thr_atP,
timetoEvent=times,
title="Test: Breast Cancer",
ysurvlim=c(0.00,1.0),
riskTimeInterval=timeinterval)






Calibrating the test results
rdatacv <- cbind(status,prob,times)
calprob <- CalibrationProbPoissonRisk(rdatacv)
pander::pander(c(h0=calprob$h0,
Gain=calprob$hazardGain,
DeltaTime=calprob$timeInterval),
caption="Cox Calibration Parameters")
timeinterval <- calprob$timeInterval;
rdata <- cbind(status,calprob$prob)
rrAnalysisTest <- RRPlot(rdata,atProb=c(0.90),
timetoEvent=times,
title="Calibrated Test: Breast",
ysurvlim=c(0.00,1.0),
riskTimeInterval=timeinterval)





### Calibrated Test Performance
pander::pander(t(rrAnalysisTest$OERatio),caption="O/E Ratio")
O/E Ratio
| 0.871 |
0.638 |
1.16 |
pander::pander(t(rrAnalysisTest$OE95ci),caption="O/E Ratio")
O/E Ratio
| 1.05 |
1.05 |
1 |
1.11 |
pander::pander(t(rrAnalysisTest$OAcum95ci),caption="O/Acum Ratio")
O/Acum Ratio
| 0.904 |
0.903 |
0.891 |
0.916 |
pander::pander(rrAnalysisTest$c.index$cstatCI,caption="C. Index")
pander::pander(t(rrAnalysisTest$ROCAnalysis$aucs),caption="ROC AUC")
ROC AUC
| 0.607 |
0.512 |
0.702 |
pander::pander((rrAnalysisTest$ROCAnalysis$sensitivity),caption="Sensitivity")
Sensitivity
| 0.196 |
0.0936 |
0.339 |
pander::pander((rrAnalysisTest$ROCAnalysis$specificity),caption="Specificity")
Specificity
| 0.899 |
0.838 |
0.942 |
pander::pander(t(rrAnalysisTest$thr_atP),caption="Probability Thresholds")
Probability Thresholds
| 0.369 |
pander::pander(t(rrAnalysisTest$RR_atP),caption="Risk Ratio")
Risk Ratio
| 1.68 |
0.931 |
3.04 |
pander::pander(rrAnalysisTest$surdif,caption="Logrank test")
Logrank test Chisq = 4.701728 on 1 degrees of freedom, p =
0.030132
| class=0 |
170 |
37 |
41.4 |
0.467 |
4.7 |
| class=1 |
24 |
9 |
4.6 |
4.196 |
4.7 |